From fb0c3a8a30906005afc8c976b5c469d86496871e Mon Sep 17 00:00:00 2001 From: Dario Faggioli Date: Tue, 26 Apr 2016 18:56:56 +0200 Subject: [PATCH] xen: sched: avoid spuriously re-enabling IRQs in csched2_switch_sched() interrupts are already disabled when calling the hook (from schedule_cpu_switch()), so we must use spin_lock() and spin_unlock(). Add an ASSERT(), so we will notice if this code and its caller get out of sync with respect to disabling interrupts (and add one at the same exact occurrence of this pattern in Credit1 too) Signed-off-by: Dario Faggioli Reviewed-by: George Dunlap Release-acked-by: Wei Liu --- xen/common/sched_credit.c | 1 + xen/common/sched_credit2.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index db4d42ae3e..a38a63d9aa 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -615,6 +615,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int cpu, * schedule_cpu_switch()). It actually may or may not be the 'right' * one for this cpu, but that is ok for preventing races. */ + ASSERT(!local_irq_is_enabled()); spin_lock(&prv->lock); init_pdata(prv, pdata, cpu); spin_unlock(&prv->lock); diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index f3b62acfca..f95e50969a 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -2238,7 +2238,8 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, * And owning exactly that one (the lock of the old scheduler of this * cpu) is what is necessary to prevent races. */ - spin_lock_irq(&prv->lock); + ASSERT(!local_irq_is_enabled()); + spin_lock(&prv->lock); idle_vcpu[cpu]->sched_priv = vdata; @@ -2263,7 +2264,7 @@ csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu, smp_mb(); per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock; - spin_unlock_irq(&prv->lock); + spin_unlock(&prv->lock); } static void -- 2.30.2